Before you start

Set my seed

# Any number can be chose
set.seed(567890)

Goals for this file

  1. Use raw fastq and generate the quality plots to asses the quality of reads

  2. Filter and trim out bad sequences and bases from our sequencing files

  3. Write out fastq files with high quality sequences

  4. Evaluate the quality from our filter and trim.

  5. Infer errors on forward and reverse reads individually

  6. Identified ASVs on forward and reverse reads separately using the error model.

  7. Merge forward and reverse ASVs into “contigous ASVs”.

  8. Generate ASV count table. (otu_table input for phyloseq.).

Output that we need:

  1. ASV count table: otu_table

  2. Sample information: sample_table track the reads lost throughout DADA2 workflow.

Load Libraries

#Effecient package loading with pacman
pacman::p_load(tidyverse, devtools, dada2, phyloseq, patchwork, DT,
               install = FALSE)

Load Data

ERR3585832 and ERR3585832 have been removed due to containing zero reads

#Set the raw fastq path to the raw sequencing files
#Path to the fastq files
raw_fastqs_path <- "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files"
raw_fastqs_path
## [1] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files"
#What files are in this path (Intuition check)
list.files(raw_fastqs_path)
##  [1] "ERR3585831_trim_1.fastq.gz" "ERR3585831_trim_2.fastq.gz"
##  [3] "ERR3585834_trim_1.fastq.gz" "ERR3585834_trim_2.fastq.gz"
##  [5] "ERR3585835_trim_1.fastq.gz" "ERR3585835_trim_2.fastq.gz"
##  [7] "ERR3585837_trim_1.fastq.gz" "ERR3585837_trim_2.fastq.gz"
##  [9] "ERR3585838_trim_1.fastq.gz" "ERR3585838_trim_2.fastq.gz"
## [11] "ERR3585840_trim_1.fastq.gz" "ERR3585840_trim_2.fastq.gz"
## [13] "ERR3585843_trim_1.fastq.gz" "ERR3585843_trim_2.fastq.gz"
## [15] "ERR3585844_trim_1.fastq.gz" "ERR3585844_trim_2.fastq.gz"
## [17] "ERR3585846_trim_1.fastq.gz" "ERR3585846_trim_2.fastq.gz"
#How many files are there?
str(list.files(raw_fastqs_path))
##  chr [1:18] "ERR3585831_trim_1.fastq.gz" "ERR3585831_trim_2.fastq.gz" ...
#Create a vector of forward reads
forward_reads <- 
  list.files(raw_fastqs_path, pattern = "_trim_1.fastq.gz", full.names = TRUE) 
#Intuition check
head(forward_reads)
## [1] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585831_trim_1.fastq.gz"
## [2] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585834_trim_1.fastq.gz"
## [3] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585835_trim_1.fastq.gz"
## [4] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585837_trim_1.fastq.gz"
## [5] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585838_trim_1.fastq.gz"
## [6] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585840_trim_1.fastq.gz"
#Create a vector of reverse reads
reverse_reads <-
  list.files(raw_fastqs_path, pattern = "_trim_2.fastq.gz", full.names = TRUE)
#Intuition check
head(reverse_reads)
## [1] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585831_trim_2.fastq.gz"
## [2] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585834_trim_2.fastq.gz"
## [3] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585835_trim_2.fastq.gz"
## [4] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585837_trim_2.fastq.gz"
## [5] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585838_trim_2.fastq.gz"
## [6] "data/00_cutadapt/01_raw_gzipped_fastqs/trimmed_files/ERR3585840_trim_2.fastq.gz"

Assess Raw Read Quality

Evaluate raw sequence quality

Let’s see the quality of the raw reads before we trim

Plot 12 random samples of plots

#Randomly select all samples from dataset to evaluate
#Selecting 12 is typically better than 2 (like we did in class for efficiency)
random_samples <- sample(1:length(reverse_reads), size = 9)
random_samples
## [1] 6 7 1 9 3 8 4 2 5
# Calculate and plot quality of the samples
forward_filteredQual_plot_all <-
  plotQualityProfile(forward_reads[random_samples]) +
  labs(title = "Forward Read Raw Quality")

reverse_filteredQual_plot_all <-
  plotQualityProfile(reverse_reads[random_samples]) +
  labs(title = "Reverse Read Raw Quality")


# Plot them together with patchwork
forward_filteredQual_plot_all + reverse_filteredQual_plot_all

Prepare a placeholder for filtered reads

# vector of our samples, extract the sample information from our file
get_name <- function(s){unlist(strsplit(basename(s), "_"))[1]}

samples <-sapply(forward_reads, get_name, USE.NAMES = FALSE)

#Intuition check
head(samples)
## [1] "ERR3585831" "ERR3585834" "ERR3585835" "ERR3585837" "ERR3585838"
## [6] "ERR3585840"
#place filtered reads into filtered_fastqs_path
filtered_fastqs_path <- "data/01_DADA2/02_filtered_fastqs"
filtered_fastqs_path
## [1] "data/01_DADA2/02_filtered_fastqs"
# create variable filtered_F 
filtered_forward_reads <- 
  file.path(filtered_fastqs_path, paste0(samples, "_R1_filtered.fastq.gz"))

# create variable filtered_R
filtered_reverse_reads <- 
  file.path(filtered_fastqs_path, paste0(samples, "_R2_filtered.fastq.gz"))

#Intuition check
head(filtered_forward_reads)
## [1] "data/01_DADA2/02_filtered_fastqs/ERR3585831_R1_filtered.fastq.gz"
## [2] "data/01_DADA2/02_filtered_fastqs/ERR3585834_R1_filtered.fastq.gz"
## [3] "data/01_DADA2/02_filtered_fastqs/ERR3585835_R1_filtered.fastq.gz"
## [4] "data/01_DADA2/02_filtered_fastqs/ERR3585837_R1_filtered.fastq.gz"
## [5] "data/01_DADA2/02_filtered_fastqs/ERR3585838_R1_filtered.fastq.gz"
## [6] "data/01_DADA2/02_filtered_fastqs/ERR3585840_R1_filtered.fastq.gz"
length(filtered_forward_reads)
## [1] 9

Aggregated Raw Quality Plots

# Aggregate all QC plots
# Forward reads
forward_preQC_plot <-
  plotQualityProfile(forward_reads, aggregate = TRUE) +
  labs(title = "Forward Pre-QC")

# reverse reads
reverse_preQC_plot <-
  plotQualityProfile(reverse_reads, aggregate = TRUE) +
  labs(title = "Reverse Pre-QC")

preQC_aggregate_plot <-
  # Plot the forward and reverse together
  forward_preQC_plot + reverse_preQC_plot

# Show the plot
preQC_aggregate_plot

Filter and Trim Reads

Parameters of filter and trim DEPEND ON THE DATASET

  • maxN = number of N bases. Remove all Ns from the data.
  • maxEE = quality filtering threshold applied to expected errors. By default, all expected errors. Mar recommends using c(1,1). Here, if there is maxEE expected errors, its okay. If more, throw away sequence.
  • trimLeft = trim certain number of base pairs on start of each read
  • truncQ = truncate reads at the first instance of a quality score less than or equal to selected number. Chose 2
  • rm.phix = remove phi x
  • compress = make filtered files .gzipped
  • multithread = multithread
#Assign a vector to filtered reads
#Trim out poor bases, first three basepairs on forward reads
#Write out filtered fastq files
filtered_reads <-
  filterAndTrim(fwd = forward_reads, filt = filtered_forward_reads,
              rev = reverse_reads, filt.rev = filtered_reverse_reads,
              truncLen = c(275,275), trimLeft = c(50,55),
              maxN = 0, maxEE = c(1, 1),truncQ = 2, rm.phix = TRUE,
              compress = TRUE, multithread = TRUE)

# These files are described in Lee et al 2016 
# Describes library prep
# Forward 5′ TCGTCGGCAG CGTCAGATGT GTATAAGAGA CAGCCTACGG GNGGCWGCAG 3′ (50)
# Reverse 5′ GTCTCGTGGG CTCGGAGATG TGTATAAGAG ACAGGACTAC HVGGGTATCT AATCC (55) 3′

Trimmed Quality Plots

# Plot the 12 random samples after QC
forward_filteredQual_plot_all <- 
  plotQualityProfile(filtered_forward_reads[random_samples]) + 
  labs(title = "Trimmed Forward Read Quality")

reverse_filteredQual_plot_all <- 
  plotQualityProfile(filtered_reverse_reads[random_samples]) + 
  labs(title = "Trimmed Reverse Read Quality")

# Put the two plots together 
forward_filteredQual_plot_all + reverse_filteredQual_plot_all

Aggregated Trimmed Plots

# Aggregate all QC plots
# Forward reads
forward_postQC_plot <-
  plotQualityProfile(filtered_forward_reads, aggregate = TRUE) +
  labs(title = "Forward Post-QC")

# reverse reads
reverse_postQC_plot <-
  plotQualityProfile(filtered_reverse_reads, aggregate = TRUE) +
  labs(title = "Reverse Post-QC")

postQC_aggregate_plot <-
  # Plot the forward and reverse together
  forward_postQC_plot + reverse_postQC_plot

# Show the plot
postQC_aggregate_plot

Stats on read output from filterAndTrim

#Make output into dataframe
filtered_df <- as.data.frame(filtered_reads)
head(filtered_df)
##                            reads.in reads.out
## ERR3585831_trim_1.fastq.gz    67952     21348
## ERR3585834_trim_1.fastq.gz    48239     12614
## ERR3585835_trim_1.fastq.gz   130026     54352
## ERR3585837_trim_1.fastq.gz   187118     69279
## ERR3585838_trim_1.fastq.gz   208399     85011
## ERR3585840_trim_1.fastq.gz   314263     76997
#View(filtered_df)
# calculate some stats
filtered_df %>%
  reframe(median_reads_in = median(reads.in),
          median_reads_out = median(reads.out),
          median_percent_retained = (median(reads.out)/median(reads.in)))
##   median_reads_in median_reads_out median_percent_retained
## 1          130026            56715                0.436182

43.6 percent of reads are retained. The aggregated graphs look good.

Error Modeling

Note every sequencing run needs to be run separately! The error model MUST be run separately on each illumina dataset. If you’d like to combine the datasets from multiple sequencing runs, you’ll need to do the exact same filterAndTrim() step AND, very importantly, you’ll need to have the same primer and ASV length expected by the output.

Infer error rates for all possible transitions within purines and pyrimidines (A<>G or C<>T) and transversions between all purine and pyrimidine combinations.

Error model is learned by alternating estimation of the error rates and inference of sample composition until they converge.

  1. Starts with the assumption that the error rates are the maximum (takes the most abundant sequence (“center”) and assumes it’s the only sequence not caused by errors).
  2. Compares the other sequences to the most abundant sequence.
  3. Uses at most 108 nucleotides for the error estimation.
  4. Uses parametric error estimation function of loess fit on the observed error rates.
#Forward reads
error_forward_reads <-
  learnErrors(filtered_forward_reads, multithread = TRUE)
## 105558975 total bases in 469151 reads from 8 samples will be used for learning the error rates.
#Plot forward reads errors
forward_error_plot <-
  plotErrors(error_forward_reads, nominalQ = TRUE) + 
  labs(title = "Forward Read Error Model")

#Reverse reads
error_reverse_reads <-
  learnErrors(filtered_reverse_reads, multithread = TRUE)
## 103213220 total bases in 469151 reads from 8 samples will be used for learning the error rates.
#Plot reverse reads errors
reverse_error_plot <-
  plotErrors(error_reverse_reads, nominalQ = TRUE) +
    labs(title = "Reverse Read Error Model")

#Put the two plots together
forward_error_plot + reverse_error_plot
## Warning in scale_y_log10(): log-10 transformation introduced infinite values.
## log-10 transformation introduced infinite values.
## log-10 transformation introduced infinite values.

The error plots look pretty good. The points seem to follow the black lines.

  • The error rates for each possible transition (A→C, A→G, …) are shown in the plot above.

Details of the plot: - Points: The observed error rates for each consensus quality score.
- Black line: Estimated error rates after convergence of the machine-learning algorithm.
- Red line: The error rates expected under the nominal definition of the Q-score.

Similar to what is mentioned in the dada2 tutorial: the estimated error rates (black line) are a “reasonably good” fit to the observed rates (points), and the error rates drop with increased quality as expected. We can now infer ASVs!

Infer ASVs

An important note: This process occurs separately on forward and reverse reads! This is quite a different approach from how OTUs are identified in Mothur and also from UCHIME, oligotyping, and other OTU, MED, and ASV approaches.

#Infer forward ASVs
dada_forward <- dada(filtered_forward_reads, 
                     err = error_forward_reads,
                     multithread = TRUE)
## Sample 1 - 21348 reads in 2040 unique sequences.
## Sample 2 - 12614 reads in 1729 unique sequences.
## Sample 3 - 54352 reads in 7421 unique sequences.
## Sample 4 - 69279 reads in 9395 unique sequences.
## Sample 5 - 85011 reads in 9311 unique sequences.
## Sample 6 - 76997 reads in 8473 unique sequences.
## Sample 7 - 92835 reads in 8695 unique sequences.
## Sample 8 - 56715 reads in 7309 unique sequences.
## Sample 9 - 236 reads in 165 unique sequences.
#Infer reverse ASVs
dada_reverse <- dada(filtered_reverse_reads, 
                     err = error_reverse_reads, 
                     multithread = TRUE)
## Sample 1 - 21348 reads in 2728 unique sequences.
## Sample 2 - 12614 reads in 2113 unique sequences.
## Sample 3 - 54352 reads in 8443 unique sequences.
## Sample 4 - 69279 reads in 10390 unique sequences.
## Sample 5 - 85011 reads in 11563 unique sequences.
## Sample 6 - 76997 reads in 11239 unique sequences.
## Sample 7 - 92835 reads in 11277 unique sequences.
## Sample 8 - 56715 reads in 8207 unique sequences.
## Sample 9 - 236 reads in 174 unique sequences.
#Inspect
dada_forward[1]
## $ERR3585831_R1_filtered.fastq.gz
## dada-class: object describing DADA2 denoising results
## 104 sequence variants were inferred from 2040 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
dada_reverse[1]
## $ERR3585831_R2_filtered.fastq.gz
## dada-class: object describing DADA2 denoising results
## 80 sequence variants were inferred from 2728 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
dada_forward[9]
## $ERR3585846_R1_filtered.fastq.gz
## dada-class: object describing DADA2 denoising results
## 9 sequence variants were inferred from 165 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
dada_reverse[9]
## $ERR3585846_R2_filtered.fastq.gz
## dada-class: object describing DADA2 denoising results
## 9 sequence variants were inferred from 174 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16

Merge Forward and Reverse ASVs

Now, merge the forward and reverse ASVs into contigs.

# merge forward and reverse ASVs
merged_ASVs <- mergePairs(dada_forward, filtered_forward_reads, 
                          dada_reverse, filtered_reverse_reads,
                          verbose = TRUE)
## 20791 paired-reads (in 104 unique pairings) successfully merged out of 21113 (in 207 pairings) input.
## 12126 paired-reads (in 101 unique pairings) successfully merged out of 12361 (in 196 pairings) input.
## 51354 paired-reads (in 361 unique pairings) successfully merged out of 53386 (in 1215 pairings) input.
## 66077 paired-reads (in 444 unique pairings) successfully merged out of 68030 (in 1249 pairings) input.
## 81614 paired-reads (in 510 unique pairings) successfully merged out of 83728 (in 1387 pairings) input.
## 74933 paired-reads (in 363 unique pairings) successfully merged out of 75996 (in 713 pairings) input.
## 89755 paired-reads (in 378 unique pairings) successfully merged out of 91593 (in 1003 pairings) input.
## 54216 paired-reads (in 417 unique pairings) successfully merged out of 55824 (in 1028 pairings) input.
## 114 paired-reads (in 4 unique pairings) successfully merged out of 152 (in 7 pairings) input.
# Evaluate the output 
typeof(merged_ASVs) # should be list
## [1] "list"
length(merged_ASVs) # should be 9
## [1] 9
names(merged_ASVs)
## [1] "ERR3585831_R1_filtered.fastq.gz" "ERR3585834_R1_filtered.fastq.gz"
## [3] "ERR3585835_R1_filtered.fastq.gz" "ERR3585837_R1_filtered.fastq.gz"
## [5] "ERR3585838_R1_filtered.fastq.gz" "ERR3585840_R1_filtered.fastq.gz"
## [7] "ERR3585843_R1_filtered.fastq.gz" "ERR3585844_R1_filtered.fastq.gz"
## [9] "ERR3585846_R1_filtered.fastq.gz"
# Inspect the merger data.frame from the 20210602-MA-ABB1P 
head(merged_ASVs[[3]])
##                                                                                                                                                                                                                                                                                                                        sequence
## 1 AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAATATCACCTACGTGTAGGTGTTTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTTATAAGTTAGAGGTGAAATATCGGGGCTCAACCCCGAAACTGCCTCTAATACTGTAGAACTAGAGAGTAGTTGCGGTAGGCGGAATGTATAGTGTAGCGGTGAAATGCTTAGAGATTATACAGAACACCGATTGCGAA
## 2 AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAAACCCAGATACGTGTATCTGGCTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGAGGTGAAATCTCGGAGCTCAACTCCGAAACTGCCTCTAATACTGTCAAGCTAGAGAGTAGTTGCTGTGGGCGGAATGTATGGTGTAGCGGTGAAATGCTTAGAGATCATACAGAACACCGATTGCGAA
## 3 AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAAAAACAGATACGCGTATCTGCTTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATCCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGAGGTGAAATGTCGGGGCTCAACCCCGAAACTGCCTCTAATACTGTTAGACTAGAGAGTAGTTGCTGTGGGCGGAATGTATGGTGTAGCGGTGAAATGCTTAGATATCATACAGAACACCGATTGCGAA
## 4 AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATAGGGGTAAACTTAGGTACGTGTACCTAACTGAAAGTACTATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGGGGTGAAATACCGAGGCTCAACCTCGGAACTGCCCCTAATACTGTTGAACTAGAGAATAGTTGCTGTTGGCGGAATGTGTAGTGTAGCGGTGAAATGCTTAGATATTACACAGAACACCGATTGCGAA
## 5 GGGACGAAGGTTTTCGAATTGTAAACCCCTGTCGAATAGGACTAAACGTAAGGTTAGTAGCCTTACCTGAATTAACTATTAGAGGAAGCAGTGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGACTGCGAGCGTTACTCGGATTCACTGGGCGTAAAGGGAGCGCAGGCGGTTGTATGTGTTGATTGTGAAATCTCGGGGCTCAACTCCGAAACTGCAGTCAAAACTATACAACTAGAGTATTGGAGGGGTAAACGGAATTTCTGGTGTAGCGGTGAAATGCGCAGATATCAGAAGGAACACCGAAGGCGAA
## 6                    AGGAAGAAGGTTTTAGGATTGTAAACTTCTGTCGTAAGTGAAGAAGAATGACGGTAACTTACAAGAAAGCCCCGGCTAACTACGTGCCAGCAGCCGCGGTAATACGTAGGGGGCAAGCGTTATCCGGAATGACTGGGCGTAAAGGGAGCGTAGGCGGCTCTTTAAGTTATGTGTGAAAGCCCACAGCTCAACTGTGGAACTGCACATAAAACTGGAGAACTAGAGTGCGGGAGAGGTAAGTGGAATTCCTAGTGTAGCGGTGGAATGCGTAGATATTAGGAGGAACACCAGTGGCGAA
##   abundance forward reverse nmatch nmismatch nindel prefer accept
## 1      6664       1       1    128         0      0      2   TRUE
## 2      3743       2       2    128         0      0      1   TRUE
## 3      3245       3       3    128         0      0      1   TRUE
## 4      2616       4       4    128         0      0      1   TRUE
## 5      2303       5       5    128         0      0      1   TRUE
## 6      1782       6       7    147         0      0      1   TRUE

Create Raw ASV Count Table

# Create the ASV Count Table 
raw_ASV_table <- makeSequenceTable(merged_ASVs)

# Write out the file to data/01_DADA2


# Check the type and dimensions of the data
dim(raw_ASV_table)
## [1]    9 1274
class(raw_ASV_table)
## [1] "matrix" "array"
typeof(raw_ASV_table)
## [1] "integer"
# Inspect the distribution of sequence lengths of all ASVs in dataset 
table(nchar(getSequences(raw_ASV_table)))
## 
## 290 296 297 298 299 300 301 302 304 307 310 311 312 314 315 316 317 318 319 320 
##   5  31  85  44  49 113  17  11   2   3   1   1   1   1   1  22 659   7   2   3 
## 321 322 323 324 334 352 
##  45 153  15   1   1   1
# Inspect the distribution of sequence lengths of all ASVs in dataset 
# BEFORE TRIM
data.frame(Seq_Length = nchar(getSequences(raw_ASV_table))) %>%
  ggplot(aes(x = Seq_Length )) + 
  geom_histogram() + 
  labs(title = "Raw distribution of ASV length")
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

###################################################
###################################################
# TRIM THE ASVS
# Let's trim the ASVs to only be the right size, which is 355.

# We will allow for a few 
raw_ASV_table_trimmed <- raw_ASV_table[,nchar(colnames(raw_ASV_table))
                                       %in% 317]

# Inspect the distribution of sequence lengths of all ASVs in dataset 
table(nchar(getSequences(raw_ASV_table_trimmed)))
## 
## 317 
## 659
# What proportion is left of the sequences? 
sum(raw_ASV_table_trimmed)/sum(raw_ASV_table)
## [1] 0.537842
# Inspect the distribution of sequence lengths of all ASVs in dataset 
# AFTER TRIM
data.frame(Seq_Length = nchar(getSequences(raw_ASV_table_trimmed))) %>%
  ggplot(aes(x = Seq_Length )) + 
  geom_histogram() + 
  labs(title = "Trimmed distribution of ASV length")
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

Taking into account the lower, zoomed-in plot. Do we want to remove those extra ASVs?

Remove Chimeras

Sometimes chimeras arise in our workflow.

Chimeric sequences are artificial sequences formed by the combination of two or more distinct biological sequences. These chimeric sequences can arise during the polymerase chain reaction (PCR) amplification step of the 16S rRNA gene, where fragments from different templates can be erroneously joined together.

Chimera removal is an essential step in the analysis of 16S sequencing data to improve the accuracy of downstream analyses, such as taxonomic assignment and diversity assessment. It helps to avoid the inclusion of misleading or spurious sequences that could lead to incorrect biological interpretations.

# Remove the chimeras in the raw ASV table
noChimeras_ASV_table <- removeBimeraDenovo(raw_ASV_table_trimmed, 
                                           method="consensus", 
                                           multithread=TRUE, verbose=TRUE)
## Identified 512 bimeras out of 659 input sequences.
# Check the dimensions
dim(noChimeras_ASV_table)
## [1]   9 147
# What proportion is left of the sequences? 
sum(noChimeras_ASV_table)/sum(raw_ASV_table_trimmed)
## [1] 0.9579767
sum(noChimeras_ASV_table)/sum(raw_ASV_table)
## [1] 0.5152401
# Plot it 
data.frame(Seq_Length_NoChim = nchar(getSequences(noChimeras_ASV_table))) %>%
  ggplot(aes(x = Seq_Length_NoChim )) + 
  geom_histogram()+ 
  labs(title = "Trimmed + Chimera Removal distribution of ASV length")
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

Track the read counts

Here, we will look at the number of reads that were lost in the filtering, denoising, merging, and chimera removal.

# A little function to identify number seqs 
getN <- function(x) sum(getUniques(x))

# Make the table to track the seqs 
track <- cbind(filtered_reads, 
               sapply(dada_forward, getN),
               sapply(dada_reverse, getN),
               sapply(merged_ASVs, getN),
               rowSums(noChimeras_ASV_table))

head(track)
##                            reads.in reads.out                        
## ERR3585831_trim_1.fastq.gz    67952     21348 21180 21239 20791  1627
## ERR3585834_trim_1.fastq.gz    48239     12614 12407 12539 12126  2891
## ERR3585835_trim_1.fastq.gz   130026     54352 53535 54148 51354 39091
## ERR3585837_trim_1.fastq.gz   187118     69279 68271 68936 66077 46788
## ERR3585838_trim_1.fastq.gz   208399     85011 84030 84609 81614 65571
## ERR3585840_trim_1.fastq.gz   314263     76997 76264 76601 74933  4114
# Update column names to be more informative (most are missing at the moment!)
colnames(track) <- c("input", "filtered", "denoisedF", "denoisedR", "merged",
                     "nochim")
rownames(track) <- samples

# Generate a dataframe to track the reads through our DADA2 pipeline
track_counts_df <- 
  track %>%
  # make it a dataframe
  as.data.frame() %>%
  rownames_to_column(var = "names") %>%
  mutate(perc_reads_retained = 100 * nochim / input)

# Visualize it in table format 
DT::datatable(track_counts_df)
# Plot it!
track_counts_df %>%
  pivot_longer(input:nochim, names_to = "read_type", 
               values_to = "num_reads") %>%
  mutate(read_type = fct_relevel(read_type, 
                                 "input", "filtered", "denoisedF", "denoisedR",
                                 "merged", "nochim")) %>%
  ggplot(aes(x = read_type, y = num_reads, fill = read_type)) + 
  geom_line(aes(group = names), color = "grey") + 
  geom_point(shape = 21, size = 3, alpha = 0.8) + 
  scale_fill_brewer(palette = "Spectral") + 
  labs(x = "Filtering Step", y = "Number of Sequences") + 
  theme_bw()

We are not assigning taxonomy at this point. We will assign taxonomy after we merge this dataset with the other datasets in our meta-analysis.

Prepare the data for export!

1. ASV Table

Below, we will prepare the following:

  1. Two ASV Count tables:
    1. With ASV seqs: ASV headers include the entire ASV sequence ~250bps.
    2. with ASV names: This includes re-written and shortened headers like ASV_1, ASV_2, etc, which will match the names in our fasta file below.
  2. ASV_fastas: A fasta file that we can use to build a tree for phylogenetic analyses (e.g. phylogenetic alpha diversity metrics or UNIFRAC dissimilarty).

Finalize ASV Count Tables

########### 2. COUNT TABLE ###############
############## Modify the ASV names and then save a fasta file!  ############## 
# Give headers more manageable names
# First pull the ASV sequences
asv_seqs <- colnames(noChimeras_ASV_table)
asv_seqs[1:5]
## [1] "AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAAAAACAGATACGCGTATCTGCTTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATCCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGAGGTGAAATGTCGGGGCTCAACCCCGAAACTGCCTCTAATACTGTTAGACTAGAGAGTAGTTGCTGTGGGCGGAATGTATGGTGTAGCGGTGAAATGCTTAGATATCATACAGAACACCGATTGCGAA"
## [2] "AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAATATCACCTACGTGTAGGTGTTTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTTATAAGTTAGAGGTGAAATATCGGGGCTCAACCCCGAAACTGCCTCTAATACTGTAGAACTAGAGAGTAGTTGCGGTAGGCGGAATGTATAGTGTAGCGGTGAAATGCTTAGAGATTATACAGAACACCGATTGCGAA"
## [3] "AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAAACCCAGATACGTGTATCTGGCTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGAGGTGAAATCTCGGAGCTCAACTCCGAAACTGCCTCTAATACTGTCAAGCTAGAGAGTAGTTGCTGTGGGCGGAATGTATGGTGTAGCGGTGAAATGCTTAGAGATCATACAGAACACCGATTGCGAA"
## [4] "AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTACGAGGGTAAAATGTGGTACGTGTACCACACTGAAAGTACCGTACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCGAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGAGGTGAAATGTCGGGGCTCAACCCCGAAACTGCCTCTAATACTGTCAGACTAGAGAGTAGTTGCTGTGGGCGGAATGTATGGTGTAGCGGTGAAATGCTTAGATATCATACAGAACACCGATTGCGAA"
## [5] "AGGATGACGGCTCTATGAGTTGTAAACTGCTTTTGTATGAGGGTAAACCCAGGTACGTGTACCTGGCTGAAAGTATCATACGAATAAGGATCGGCTAACTCCGTGCCAGCAGCCGCGGTAATACGGAGGATTCAAGCGTTATCCGGATTTATTGGGTTTAAAGGGTGCGTAGGCGGTTTGATAAGTTAGAGGTGAAATCTCGGAGCTCAACTCCGAAACTGCCTCTAATACTGTCGAACTAGAGATTAGTTGCTGTGGGCGGAATGTATGGTGTAGCGGTGAAATGCTTAGAGATCATACAGAACACCGATTGCGAA"
# make headers for our ASV seq fasta file, which will be our asv names
asv_headers <- vector(dim(noChimeras_ASV_table)[2], mode = "character")
asv_headers[1:5]
## [1] "" "" "" "" ""
# loop through vector and fill it in with ASV names 
for (i in 1:dim(noChimeras_ASV_table)[2]) {
  asv_headers[i] <- paste(">ASV", i, sep = "_")
}

# intitution check
asv_headers[1:5]
## [1] ">ASV_1" ">ASV_2" ">ASV_3" ">ASV_4" ">ASV_5"
##### Rename ASVs in table then write out our ASV fasta file! 
#View(noChimeras_ASV_table)
asv_tab <- t(noChimeras_ASV_table)
#View(asv_tab)

## Rename our asvs! 
row.names(asv_tab) <- sub(">", "", asv_headers)
#View(asv_tab)

Write 01_DADA2 files

Now, we will write the files! We will write the following to the data/01_DADA2/05_fullLength_analysis/ folder. We will save both as files that could be submitted as supplements AND as .RData objects for easy loading into the next steps into R.:

  1. ASV_counts.tsv: ASV count table that has ASV names that are re-written and shortened headers like ASV_1, ASV_2, etc, which will match the names in our fasta file below. This will also be saved as data/01_DADA2/05_fullLength_analysis/ASV_counts.RData.
  2. ASV_counts_withSeqNames.tsv: This is generated with the data object in this file known as noChimeras_ASV_table. ASV headers include the entire ASV sequence ~250bps. In addition, we will save this as a .RData object as data/01_DADA2/05_fullLength_analysis/noChimeras_ASV_table.RData as we will use this data in analysis/02_PreProcessing.Rmd to assign the taxonomy from the sequence headers.
  3. ASVs.fasta: A fasta file output of the ASV names from ASV_counts.tsv and the sequences from the ASVs in ASV_counts_withSeqNames.tsv. A fasta file that we can use to build a tree for phylogenetic analyses (e.g. phylogenetic alpha diversity metrics or UNIFRAC dissimilarty).
  4. We will also make a copy of ASVs.fasta in data/05_fullLength_analysis/ to be used for the taxonomy classification in the next step in the workflow.
  5. track_read_counts.RData: To track how many reads we lost throughout our workflow that could be used and plotted later. We will add this to the metadata in analysis/02_PreProcessing.Rmd.
# FIRST, we will save our output as regular files, which will be useful later on. 
# Save to regular .tsv file 
# Write BOTH the modified and unmodified ASV tables to a file!
# Write count table with ASV numbered names (e.g. ASV_1, ASV_2, etc)
write.table(asv_tab, "data/01_DADA2/Bikrim_analysis/ASV_counts.tsv",
            sep = "\t", quote = FALSE, col.names = NA)
# Write count table with ASV sequence names
write.table(noChimeras_ASV_table,
            "data/01_DADA2/Bikrim_analysis/ASV_counts_withSeqNames.tsv",
            sep = "\t", quote = FALSE, col.names = NA)
# Write out the fasta file for reference later on for what seq matches what ASV
asv_fasta <- c(rbind(asv_headers, asv_seqs))
# Save to a file!
write(asv_fasta, "data/01_DADA2/Bikrim_analysis/ASVs.fasta")

# SECOND, let's save to a RData object 
# Each of these files will be used in the analysis/02_PreProcessing
# RData objects are for easy loading :) 
saveRDS(noChimeras_ASV_table,
     file = "data/01_DADA2/Bikrim_analysis/noChimeras_ASV_table.RDS")
saveRDS(asv_tab, file = "data/01_DADA2/Bikrim_analysis/ASV_counts.RDS")
# And save the track_counts_df a R object, which we will merge with metadata information in the next step of the analysis in analysis/02_PreProcessing. 
saveRDS(track_counts_df, 
     file = "data/01_DADA2/Bikrim_analysis/track_read_counts.RDS")

##Session information

#Ensure reproducibility
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
##  setting  value
##  version  R version 4.4.2 (2024-10-31)
##  os       macOS Sequoia 15.7.1
##  system   x86_64, darwin20
##  ui       X11
##  language (EN)
##  collate  en_US.UTF-8
##  ctype    en_US.UTF-8
##  tz       America/New_York
##  date     2025-10-17
##  pandoc   3.4 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/x86_64/ (via rmarkdown)
##  quarto   1.6.42 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/quarto
## 
## ─ Packages ───────────────────────────────────────────────────────────────────
##  package              * version date (UTC) lib source
##  abind                  1.4-8   2024-09-12 [1] CRAN (R 4.4.1)
##  ade4                   1.7-23  2025-02-14 [1] CRAN (R 4.4.1)
##  ape                    5.8-1   2024-12-16 [1] CRAN (R 4.4.1)
##  Biobase                2.66.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  BiocGenerics           0.52.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  BiocParallel           1.40.2  2025-04-08 [1] Bioconductor
##  biomformat             1.34.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  Biostrings             2.74.1  2024-12-16 [1] Bioconductor 3.20 (R 4.4.2)
##  bitops                 1.0-9   2024-10-03 [1] CRAN (R 4.4.1)
##  bslib                  0.9.0   2025-01-30 [1] CRAN (R 4.4.1)
##  cachem                 1.1.0   2024-05-16 [1] CRAN (R 4.4.0)
##  cli                    3.6.5   2025-04-23 [1] CRAN (R 4.4.1)
##  cluster                2.1.6   2023-12-01 [2] CRAN (R 4.4.2)
##  codetools              0.2-20  2024-03-31 [2] CRAN (R 4.4.2)
##  colorspace             2.1-1   2024-07-26 [1] CRAN (R 4.4.0)
##  crayon                 1.5.3   2024-06-20 [1] CRAN (R 4.4.0)
##  crosstalk              1.2.1   2023-11-23 [1] CRAN (R 4.4.0)
##  dada2                * 1.34.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  data.table             1.17.8  2025-07-10 [1] CRAN (R 4.4.1)
##  DelayedArray           0.32.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  deldir                 2.0-4   2024-02-28 [1] CRAN (R 4.4.0)
##  devtools             * 2.4.5   2022-10-11 [1] CRAN (R 4.4.0)
##  digest                 0.6.37  2024-08-19 [1] CRAN (R 4.4.1)
##  dplyr                * 1.1.4   2023-11-17 [1] CRAN (R 4.4.0)
##  DT                   * 0.33    2024-04-04 [1] CRAN (R 4.4.0)
##  ellipsis               0.3.2   2021-04-29 [1] CRAN (R 4.4.0)
##  evaluate               1.0.4   2025-06-18 [1] CRAN (R 4.4.1)
##  farver                 2.1.2   2024-05-13 [1] CRAN (R 4.4.0)
##  fastmap                1.2.0   2024-05-15 [1] CRAN (R 4.4.0)
##  forcats              * 1.0.0   2023-01-29 [1] CRAN (R 4.4.0)
##  foreach                1.5.2   2022-02-02 [1] CRAN (R 4.4.0)
##  fs                     1.6.6   2025-04-12 [1] CRAN (R 4.4.1)
##  generics               0.1.4   2025-05-09 [1] CRAN (R 4.4.1)
##  GenomeInfoDb           1.42.3  2025-01-27 [1] Bioconductor 3.20 (R 4.4.2)
##  GenomeInfoDbData       1.2.13  2025-08-12 [1] Bioconductor
##  GenomicAlignments      1.42.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  GenomicRanges          1.58.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  ggplot2              * 3.5.2   2025-04-09 [1] CRAN (R 4.4.1)
##  glue                   1.8.0   2024-09-30 [1] CRAN (R 4.4.1)
##  gtable                 0.3.6   2024-10-25 [1] CRAN (R 4.4.1)
##  hms                    1.1.3   2023-03-21 [1] CRAN (R 4.4.0)
##  htmltools              0.5.8.1 2024-04-04 [1] CRAN (R 4.4.0)
##  htmlwidgets            1.6.4   2023-12-06 [1] CRAN (R 4.4.0)
##  httpuv                 1.6.16  2025-04-16 [1] CRAN (R 4.4.1)
##  httr                   1.4.7   2023-08-15 [1] CRAN (R 4.4.0)
##  hwriter                1.3.2.1 2022-04-08 [1] CRAN (R 4.4.0)
##  igraph                 2.1.4   2025-01-23 [1] CRAN (R 4.4.1)
##  interp                 1.1-6   2024-01-26 [1] CRAN (R 4.4.0)
##  IRanges                2.40.1  2024-12-05 [1] Bioconductor 3.20 (R 4.4.2)
##  iterators              1.0.14  2022-02-05 [1] CRAN (R 4.4.0)
##  jpeg                   0.1-11  2025-03-21 [1] CRAN (R 4.4.1)
##  jquerylib              0.1.4   2021-04-26 [1] CRAN (R 4.4.0)
##  jsonlite               2.0.0   2025-03-27 [1] CRAN (R 4.4.1)
##  knitr                  1.50    2025-03-16 [1] CRAN (R 4.4.1)
##  labeling               0.4.3   2023-08-29 [1] CRAN (R 4.4.0)
##  later                  1.4.2   2025-04-08 [1] CRAN (R 4.4.1)
##  lattice                0.22-6  2024-03-20 [2] CRAN (R 4.4.2)
##  latticeExtra           0.6-30  2022-07-04 [1] CRAN (R 4.4.0)
##  lifecycle              1.0.4   2023-11-07 [1] CRAN (R 4.4.0)
##  lubridate            * 1.9.4   2024-12-08 [1] CRAN (R 4.4.1)
##  magrittr               2.0.3   2022-03-30 [1] CRAN (R 4.4.0)
##  MASS                   7.3-61  2024-06-13 [2] CRAN (R 4.4.2)
##  Matrix                 1.7-1   2024-10-18 [2] CRAN (R 4.4.2)
##  MatrixGenerics         1.18.1  2025-01-09 [1] Bioconductor 3.20 (R 4.4.2)
##  matrixStats            1.5.0   2025-01-07 [1] CRAN (R 4.4.1)
##  memoise                2.0.1   2021-11-26 [1] CRAN (R 4.4.0)
##  mgcv                   1.9-1   2023-12-21 [2] CRAN (R 4.4.2)
##  mime                   0.13    2025-03-17 [1] CRAN (R 4.4.1)
##  miniUI                 0.1.2   2025-04-17 [1] CRAN (R 4.4.1)
##  multtest               2.62.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  nlme                   3.1-166 2024-08-14 [2] CRAN (R 4.4.2)
##  pacman                 0.5.1   2019-03-11 [1] CRAN (R 4.4.0)
##  patchwork            * 1.3.1   2025-06-21 [1] CRAN (R 4.4.1)
##  permute                0.9-8   2025-06-25 [1] CRAN (R 4.4.1)
##  phyloseq             * 1.50.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  pillar                 1.11.0  2025-07-04 [1] CRAN (R 4.4.1)
##  pkgbuild               1.4.8   2025-05-26 [1] CRAN (R 4.4.1)
##  pkgconfig              2.0.3   2019-09-22 [1] CRAN (R 4.4.0)
##  pkgload                1.4.0   2024-06-28 [1] CRAN (R 4.4.0)
##  plyr                   1.8.9   2023-10-02 [1] CRAN (R 4.4.0)
##  png                    0.1-8   2022-11-29 [1] CRAN (R 4.4.0)
##  profvis                0.4.0   2024-09-20 [1] CRAN (R 4.4.1)
##  promises               1.3.3   2025-05-29 [1] CRAN (R 4.4.1)
##  purrr                * 1.1.0   2025-07-10 [1] CRAN (R 4.4.1)
##  pwalign                1.2.0   2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  R6                     2.6.1   2025-02-15 [1] CRAN (R 4.4.1)
##  RColorBrewer           1.1-3   2022-04-03 [1] CRAN (R 4.4.0)
##  Rcpp                 * 1.1.0   2025-07-02 [1] CRAN (R 4.4.1)
##  RcppParallel           5.1.10  2025-01-24 [1] CRAN (R 4.4.1)
##  readr                * 2.1.5   2024-01-10 [1] CRAN (R 4.4.0)
##  remotes                2.5.0   2024-03-17 [1] CRAN (R 4.4.0)
##  reshape2               1.4.4   2020-04-09 [1] CRAN (R 4.4.0)
##  rhdf5                  2.50.2  2025-01-09 [1] Bioconductor 3.20 (R 4.4.2)
##  rhdf5filters           1.18.1  2025-03-06 [1] Bioconductor 3.20 (R 4.4.3)
##  Rhdf5lib               1.28.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  rlang                  1.1.6   2025-04-11 [1] CRAN (R 4.4.1)
##  rmarkdown              2.29    2024-11-04 [1] CRAN (R 4.4.1)
##  Rsamtools              2.22.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  rstudioapi             0.17.1  2024-10-22 [1] CRAN (R 4.4.1)
##  S4Arrays               1.6.0   2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  S4Vectors              0.44.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  sass                   0.4.10  2025-04-11 [1] CRAN (R 4.4.1)
##  scales                 1.4.0   2025-04-24 [1] CRAN (R 4.4.1)
##  sessioninfo            1.2.3   2025-02-05 [1] CRAN (R 4.4.1)
##  shiny                  1.11.1  2025-07-03 [1] CRAN (R 4.4.1)
##  ShortRead              1.64.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  SparseArray            1.6.2   2025-02-20 [1] Bioconductor 3.20 (R 4.4.2)
##  stringi                1.8.7   2025-03-27 [1] CRAN (R 4.4.1)
##  stringr              * 1.5.1   2023-11-14 [1] CRAN (R 4.4.0)
##  SummarizedExperiment   1.36.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  survival               3.7-0   2024-06-05 [2] CRAN (R 4.4.2)
##  tibble               * 3.3.0   2025-06-08 [1] CRAN (R 4.4.1)
##  tidyr                * 1.3.1   2024-01-24 [1] CRAN (R 4.4.0)
##  tidyselect             1.2.1   2024-03-11 [1] CRAN (R 4.4.0)
##  tidyverse            * 2.0.0   2023-02-22 [1] CRAN (R 4.4.0)
##  timechange             0.3.0   2024-01-18 [1] CRAN (R 4.4.0)
##  tzdb                   0.5.0   2025-03-15 [1] CRAN (R 4.4.1)
##  UCSC.utils             1.2.0   2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  urlchecker             1.0.1   2021-11-30 [1] CRAN (R 4.4.0)
##  usethis              * 3.1.0   2024-11-26 [1] CRAN (R 4.4.1)
##  vctrs                  0.6.5   2023-12-01 [1] CRAN (R 4.4.0)
##  vegan                  2.7-1   2025-06-05 [1] CRAN (R 4.4.1)
##  withr                  3.0.2   2024-10-28 [1] CRAN (R 4.4.1)
##  xfun                   0.52    2025-04-02 [1] CRAN (R 4.4.1)
##  xtable                 1.8-4   2019-04-21 [1] CRAN (R 4.4.0)
##  XVector                0.46.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
##  yaml                   2.3.10  2024-07-26 [1] CRAN (R 4.4.0)
##  zlibbioc               1.52.0  2024-10-29 [1] Bioconductor 3.20 (R 4.4.1)
## 
##  [1] /Users/cab565/Library/R/x86_64/4.4/library
##  [2] /Library/Frameworks/R.framework/Versions/4.4-x86_64/Resources/library
##  * ── Packages attached to the search path.
## 
## ──────────────────────────────────────────────────────────────────────────────